More TLB-flush cleanups. Simplify and rationalise the interface.
u32 tlbflush_clock;
u32 tlbflush_time[NR_CPUS];
-static inline void tlb_clocktick(unsigned int cpu)
+void tlb_clocktick(void)
{
u32 y, ny;
}
while ( unlikely((ny = cmpxchg(&tlbflush_clock, y-1, y)) != y-1) );
- /* Update cpu's timestamp to new time. */
- tlbflush_time[cpu] = y;
+ /* Update this CPU's timestamp to new time. */
+ tlbflush_time[smp_processor_id()] = y;
}
-
-void write_cr3_counted(unsigned long pa)
-{
- __asm__ __volatile__ (
- "movl %0, %%cr3"
- : : "r" (pa) : "memory" );
- tlb_clocktick(smp_processor_id());
-}
-
-void flush_tlb_counted(void)
-{
- __asm__ __volatile__ (
- "movl %%cr3, %%eax; movl %%eax, %%cr3"
- : : : "memory", "eax" );
- tlb_clocktick(smp_processor_id());
-}
-
}
}
- /* Switch page tables. */
- write_ptbase( &next_p->mm );
+ /* Switch page tables. */
+ write_ptbase(&next_p->mm);
+ tlb_clocktick();
set_current(next_p);
#undef CD
/* Install correct page table. */
- __asm__ __volatile__ ("movl %%eax,%%cr3"
- : : "a" (pagetable_val(current->mm.pagetable)));
+ write_ptbase(¤t->mm);
init_idle_task();
}
/* Install the new page tables. */
__cli();
- write_cr3_counted(pagetable_val(p->mm.pagetable));
+ write_ptbase(&p->mm);
/* Copy the OS image. */
(void)loadelfimage(image_start);
*dst = '\0';
/* Reinstate the caller's page tables. */
- write_cr3_counted(pagetable_val(current->mm.pagetable));
+ write_ptbase(¤t->mm);
__sti();
/* Destroy low mappings - they were only for our convenience. */
write_ptbase(¤t->mm);
put_page_and_type(&frame_table[old_base_pfn]);
+
+ /*
+ * Note that we tick the clock /after/ dropping the old base's
+ * reference count. If the page tables got freed then this will
+ * avoid unnecessary TLB flushes when the pages are reused.
+ */
+ tlb_clocktick();
}
else
{
percpu_info[cpu].deferred_ops = 0;
if ( deferred_ops & DOP_FLUSH_TLB )
- {
- write_ptbase(¤t->mm);
- }
+ local_flush_tlb();
if ( deferred_ops & DOP_RELOAD_LDT )
(void)map_ldt_shadow_page(0);
if ( unlikely(deferred_ops & DOP_FLUSH_TLB) ||
unlikely(flags & UVMF_FLUSH_TLB) )
- {
- write_ptbase(&p->mm);
- }
+ local_flush_tlb();
else if ( unlikely(flags & UVMF_INVLPG) )
__flush_tlb_one(page_nr << PAGE_SHIFT);
extern u32 tlbflush_clock;
extern u32 tlbflush_time[NR_CPUS];
+extern void tlb_clocktick(void);
extern void new_tlbflush_clock_period(void);
-extern void write_cr3_counted(unsigned long pa);
-extern void flush_tlb_counted(void);
-
#endif /* __FLUSHTLB_H__ */
extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE];
extern void paging_init(void);
-#define __flush_tlb() flush_tlb_counted()
+#define __flush_tlb() \
+ do { \
+ __asm__ __volatile__ ( \
+ "movl %%cr3, %%eax; movl %%eax, %%cr3" \
+ : : : "memory", "eax" ); \
+ tlb_clocktick(); \
+ } while ( 0 )
/* Flush global pages as well. */
#define __flush_tlb_pge() \
do { \
__pge_off(); \
- flush_tlb_counted(); \
+ __flush_tlb(); \
__pge_on(); \
} while (0)
#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
-#define load_cr3(pgdir) \
- asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)));
-
/*
* Save the cr4 feature set we're using (ie
* Pentium 4MB enable and PPro Global page
char gdt[6];
};
-static inline void write_ptbase( struct mm_struct *m )
+static inline void write_ptbase(struct mm_struct *mm)
{
- if ( unlikely(m->shadow_mode) )
- write_cr3_counted(pagetable_val(m->shadow_table));
+ unsigned long pa;
+
+ if ( unlikely(mm->shadow_mode) )
+ pa = pagetable_val(mm->shadow_table);
else
- write_cr3_counted(pagetable_val(m->pagetable));
+ pa = pagetable_val(mm->pagetable);
+
+ __asm__ __volatile__ ( "movl %0, %%cr3" : : "r" (pa) : "memory" );
}
#define IDLE0_MM \
extern u32 tlbflush_clock;
extern u32 tlbflush_time[NR_CPUS];
+extern void tlb_clocktick(void);
extern void new_tlbflush_clock_period(void);
-extern void write_cr3_counted(unsigned long pa);
-extern void flush_tlb_counted(void);
-
#endif /* __FLUSHTLB_H__ */
extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE];
extern void paging_init(void);
-#define __flush_tlb() flush_tlb_counted()
+#define __flush_tlb() \
+ do { \
+ __asm__ __volatile__ ( \
+ "movl %%cr3, %%eax; movl %%eax, %%cr3" \
+ : : : "memory", "eax" ); \
+ tlb_clocktick(); \
+ } while ( 0 )
/* Flush global pages as well. */
#define __flush_tlb_pge() \
do { \
__pge_off(); \
- flush_tlb_counted(); \
+ __flush_tlb(); \
__pge_on(); \
} while (0)